Can we take the gradients of our emulators? I investigate.


In [2]:
from pearce.emulator import NashvilleHot
from GPy.kern import *
import numpy as np
from os import path
import matplotlib #matplotlib.use('Agg') from matplotlib import pyplot as plt %matplotlib inline import seaborn as sns sns.set()

In [4]:
'/scratch/users/swmclau2/xi_gg_zheng07/'


Out[4]:
'/scratch/users/swmclau2/xi_gg_zheng07/'

In [5]:
training_file = '/scratch/users/swmclau2/xi_gg_zheng07/PearceXiggCosmo.hdf5'

In [6]:
em_method = 'gp'

In [7]:
fixed_params = {'z':0.0}
hyperparams = {'kernel': (Linear(input_dim=7, ARD=True) + RBF(input_dim=7, ARD=True)+Bias(input_dim=7), RBF(input_dim=4, ARD=True)+Bias(input_dim=4) ), \ 'optimize': True}

In [8]:
emu = NashvilleHot(training_file, hyperparams={},fixed_params = fixed_params, downsample_factor = 0.2)


 /home/users/swmclau2/.local/lib/python2.7/site-packages/paramz/transformations.py:111: RuntimeWarning:overflow encountered in expm1

In [10]:
model = emu._emulators[0]

In [18]:
bounds = [emu.get_param_bounds(name) for name in emu.get_param_names()]
x_new = np.array([np.random.uniform(b[0], b[1]) for b in bounds])

In [21]:
x_new = x_new.reshape((1,-1))

In [23]:
kern1 = model.kern1
mean_jac = np.empty((x_new.shape[0], x_new.shape[1], 1)) mean_jac[:, :, 0] = kern.gradients_X( self.posterior.woodbury_vector[:, i:i+1].T, Xnew, self._predictive_variable) # Gradients wrt the diagonal part k_{xx} dv_dX = kern.gradients_X_diag(np.ones(Xnew.shape[0]), Xnew)

Answer: No


In [ ]: